1 Import Libraries

knitr::opts_chunk$set(echo = TRUE)
library(knitr) # for knitting markdown files
library(dplyr)
## 
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
## 
##     filter, lag
## The following objects are masked from 'package:base':
## 
##     intersect, setdiff, setequal, union
library(ggplot2) # for plotting
library(broom)
library(reshape2)
#library(readr)
#library(readxl)
#library(Ecdat)
library(janitor)
## 
## Attaching package: 'janitor'
## The following objects are masked from 'package:stats':
## 
##     chisq.test, fisher.test
#library(plm)
#library(pwt9)
#library(quarto)
library(renv)
## 
## Attaching package: 'renv'
## The following objects are masked from 'package:stats':
## 
##     embed, update
## The following objects are masked from 'package:utils':
## 
##     history, upgrade
## The following objects are masked from 'package:base':
## 
##     autoload, load, remove
library(shiny)
## 
## Attaching package: 'shiny'
## The following object is masked from 'package:renv':
## 
##     isolate
library(targets)
library(testthat)
## 
## Attaching package: 'testthat'
## The following object is masked from 'package:targets':
## 
##     matches
## The following object is masked from 'package:dplyr':
## 
##     matches
library(tidyverse)
## ── Attaching packages
## ───────────────────────────────────────
## tidyverse 1.3.2 ──
## ✔ tibble  3.1.8     ✔ purrr   0.3.5
## ✔ tidyr   1.2.1     ✔ stringr 1.4.1
## ✔ readr   2.1.3     ✔ forcats 0.5.2
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ readr::edition_get()   masks testthat::edition_get()
## ✖ dplyr::filter()        masks stats::filter()
## ✖ purrr::is_null()       masks testthat::is_null()
## ✖ dplyr::lag()           masks stats::lag()
## ✖ readr::local_edition() masks testthat::local_edition()
## ✖ tidyr::matches()       masks testthat::matches(), targets::matches(), dplyr::matches()
## ✖ purrr::modify()        masks renv::modify()
library(tibble)
library(lubridate)
## Loading required package: timechange
## 
## Attaching package: 'lubridate'
## 
## The following objects are masked from 'package:base':
## 
##     date, intersect, setdiff, union
library(purrr)
library(Hmisc) # for dependence tests
## Loading required package: lattice
## Loading required package: survival
## Loading required package: Formula
## 
## Attaching package: 'Hmisc'
## 
## The following object is masked from 'package:testthat':
## 
##     describe
## 
## The following objects are masked from 'package:dplyr':
## 
##     src, summarize
## 
## The following objects are masked from 'package:base':
## 
##     format.pval, units
library(plotly) # for interactive plots
## 
## Attaching package: 'plotly'
## 
## The following object is masked from 'package:Hmisc':
## 
##     subplot
## 
## The following object is masked from 'package:ggplot2':
## 
##     last_plot
## 
## The following object is masked from 'package:stats':
## 
##     filter
## 
## The following object is masked from 'package:graphics':
## 
##     layout
library(hrbrthemes) 
## NOTE: Either Arial Narrow or Roboto Condensed fonts are required to use these themes.
##       Please use hrbrthemes::import_roboto_condensed() to install Roboto Condensed and
##       if Arial Narrow is not on your system, please see https://bit.ly/arialnarrow
library(xts) # for time series objects
## Loading required package: zoo
## 
## Attaching package: 'zoo'
## 
## The following objects are masked from 'package:base':
## 
##     as.Date, as.Date.numeric
## 
## 
## Attaching package: 'xts'
## 
## The following objects are masked from 'package:dplyr':
## 
##     first, last
#library(zoo)
library(seasonal) # for seasonality of time series
## 
## Attaching package: 'seasonal'
## 
## The following object is masked from 'package:tibble':
## 
##     view
library(tsbox)
library(forecast) # for forecasting time series
## Registered S3 method overwritten by 'quantmod':
##   method            from
##   as.zoo.data.frame zoo
library(tseries) # for unit root tests
#library(seasonalview)
#library(autoplotly)

library(tidyverse) # general
#library(ggalt) # dumbbell plots
library(plotly) #for drawing interactive plots
library(ggridges) #for drawing density gradient
library(shades) #edit colors in natural ways:
library(urca) 
library(tseries)
library(vars) # for VAR models
## Loading required package: MASS
## 
## Attaching package: 'MASS'
## 
## The following object is masked from 'package:plotly':
## 
##     select
## 
## The following object is masked from 'package:dplyr':
## 
##     select
## 
## Loading required package: strucchange
## Loading required package: sandwich
## 
## Attaching package: 'strucchange'
## 
## The following object is masked from 'package:stringr':
## 
##     boundary
## 
## Loading required package: lmtest
library(dynlm)
library(Metrics)
## 
## Attaching package: 'Metrics'
## 
## The following object is masked from 'package:forecast':
## 
##     accuracy
library(htmlTable) # for showing tables
#library(keras)
#library(tensorflow)
#install_keras()
#install_tensorflow(version = "nightly")
library(reticulate)
## 
## Attaching package: 'reticulate'
## The following object is masked from 'package:renv':
## 
##     use_python
Sys.unsetenv("RETICULATE_PYTHON") 
use_virtualenv("~/.virtualenvs/r-reticulate")
library(keras)
## 
## Attaching package: 'keras'
## 
## The following object is masked from 'package:renv':
## 
##     use_python
colorize <- function(x, color) {
  if (knitr::is_latex_output()) {
    sprintf("\\textcolor{%s}{%s}", color, x)
  } else if (knitr::is_html_output()) {
    sprintf("<span style='color: %s;'>%s</span>", color,
      x)
  } else x
}
#webshot::install_phantomjs()

2 Introduction

2.1 Describe Dataset

The dataset used for this project is Daily Delhi Climate, which consists of the following columns:

  1. date: Date of format YYYY-MM-DD starting from “2013-01-01” and ending in “2017-01-01”.
  2. meantemp: Mean temperature averaged out from multiple 3 hour intervals in a day.
  3. humidity: Humidity value for the day (units are grams of water vapor per cubic meter volume of air).
  4. wind_speed: Wind speed measured in kmph.
  5. mean_pressure: Pressure reading of weather (measure in atm)

2.2 Goal and Procedure

The goal of this project is to analyze and forecast the mean temperature of Delhi, which is recorded in the meantemp column. For some models, the wind speed of Delhi is also predicted.

The following four forecasting models are used for this work: autoregressive–moving-average model (ARMA), vector autoregression (VAR), feedforward neural network (NN), and long-short term memory (LSTM) neural network. After importing the dataset, outliers are removed in Preprocessing and Analysis Section section. Then meantemp column is assigned to a time series object in Construct Time Series section for further processing. After detecting seasonalities using plots, the time series is seasonally adjusted using X13-ARIMA-SEATS decomposition model. Then, remaining trend is removed in detrend.

Before forecasting the time series, I check for staionarity of time series, as stationarity is an assumption in ARIMA model. For this purpose, unit root tests are applied in Stationarity section.

Finally, I used ARIMA model to forecast the time series in Forecast Time Series section.

df_train <- read_csv("data/DailyDelhiClimateTrain.csv")
## Rows: 1462 Columns: 5
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## dbl  (4): meantemp, humidity, wind_speed, meanpressure
## date (1): date
## 
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
df_test <- read_csv("data/DailyDelhiClimateTest.csv")
## Rows: 114 Columns: 5
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## dbl  (4): meantemp, humidity, wind_speed, meanpressure
## date (1): date
## 
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
summary(df_train)
##       date               meantemp        humidity        wind_speed    
##  Min.   :2013-01-01   Min.   : 6.00   Min.   : 13.43   Min.   : 0.000  
##  1st Qu.:2014-01-01   1st Qu.:18.86   1st Qu.: 50.38   1st Qu.: 3.475  
##  Median :2015-01-01   Median :27.71   Median : 62.62   Median : 6.222  
##  Mean   :2015-01-01   Mean   :25.50   Mean   : 60.77   Mean   : 6.802  
##  3rd Qu.:2016-01-01   3rd Qu.:31.31   3rd Qu.: 72.22   3rd Qu.: 9.238  
##  Max.   :2017-01-01   Max.   :38.71   Max.   :100.00   Max.   :42.220  
##   meanpressure     
##  Min.   :  -3.042  
##  1st Qu.:1001.580  
##  Median :1008.563  
##  Mean   :1011.105  
##  3rd Qu.:1014.945  
##  Max.   :7679.333
df_train |> dplyr::glimpse()
## Rows: 1,462
## Columns: 5
## $ date         <date> 2013-01-01, 2013-01-02, 2013-01-03, 2013-01-04, 2013-01-…
## $ meantemp     <dbl> 10.000000, 7.400000, 7.166667, 8.666667, 6.000000, 7.0000…
## $ humidity     <dbl> 84.50000, 92.00000, 87.00000, 71.33333, 86.83333, 82.8000…
## $ wind_speed   <dbl> 0.0000000, 2.9800000, 4.6333333, 1.2333333, 3.7000000, 1.…
## $ meanpressure <dbl> 1015.667, 1017.800, 1018.667, 1017.167, 1016.500, 1018.00…
df_test |> summary()
##       date               meantemp        humidity       wind_speed    
##  Min.   :2017-01-01   Min.   :11.00   Min.   :17.75   Min.   : 1.387  
##  1st Qu.:2017-01-29   1st Qu.:16.44   1st Qu.:39.62   1st Qu.: 5.564  
##  Median :2017-02-26   Median :19.88   Median :57.75   Median : 8.069  
##  Mean   :2017-02-26   Mean   :21.71   Mean   :56.26   Mean   : 8.144  
##  3rd Qu.:2017-03-26   3rd Qu.:27.71   3rd Qu.:71.90   3rd Qu.:10.069  
##  Max.   :2017-04-24   Max.   :34.50   Max.   :95.83   Max.   :19.314  
##   meanpressure 
##  Min.   :  59  
##  1st Qu.:1007  
##  Median :1013  
##  Mean   :1004  
##  3rd Qu.:1017  
##  Max.   :1023
df_test |> dplyr::glimpse()
## Rows: 114
## Columns: 5
## $ date         <date> 2017-01-01, 2017-01-02, 2017-01-03, 2017-01-04, 2017-01-…
## $ meantemp     <dbl> 15.91304, 18.50000, 17.11111, 18.70000, 18.38889, 19.3181…
## $ humidity     <dbl> 85.86957, 77.22222, 81.88889, 70.05000, 74.94444, 79.3181…
## $ wind_speed   <dbl> 2.743478, 2.894444, 4.016667, 4.545000, 3.300000, 8.68181…
## $ meanpressure <dbl> 59.000, 1018.278, 1018.333, 1015.700, 1014.333, 1011.773,…

3 Visualize Data

Below we can see interactive plot of the time series.

p <- df_train |>
  ggplot( aes(x=date, y=meantemp)) +
    geom_area(fill="#69b3a2", alpha=0.5) +
    geom_line(color="#69b3a2") +
    ylab("bitcoin price ($)") +
    theme_ipsum()

# Turn it interactive with ggplotly
p <- ggplotly(p)
#p
p

4 Preprocessing

4.1 Treat Outliers: Manual

Evidenced by the plot of time series, an outlier at the last observation (last row of dataframe) can be detected. It causes an abrupt decrease in value of temperature. This would lead to problems in further analysis and forecasting models. Therefore, last observation’s value is replaced with its previous one. In Treat Outliers: Automatic, an automatic procedure of treating outliers is performed.

previous_value <- df_train$meantemp[df_train$date == as.Date('2016-12-31')]

df_train$meantemp[df_train$date == as.Date('2017-01-01')]<- previous_value 
#df_train <- head(df_train, -1)
head(df_train)
## # A tibble: 6 × 5
##   date       meantemp humidity wind_speed meanpressure
##   <date>        <dbl>    <dbl>      <dbl>        <dbl>
## 1 2013-01-01    10        84.5       0           1016.
## 2 2013-01-02     7.4      92         2.98        1018.
## 3 2013-01-03     7.17     87         4.63        1019.
## 4 2013-01-04     8.67     71.3       1.23        1017.
## 5 2013-01-05     6        86.8       3.7         1016.
## 6 2013-01-06     7        82.8       1.48        1018
tail(df_train)
## # A tibble: 6 × 5
##   date       meantemp humidity wind_speed meanpressure
##   <date>        <dbl>    <dbl>      <dbl>        <dbl>
## 1 2016-12-27     16.8     67.6       8.34        1017.
## 2 2016-12-28     17.2     68.0       3.55        1016.
## 3 2016-12-29     15.2     87.9       6           1017.
## 4 2016-12-30     14.1     89.7       6.27        1018.
## 5 2016-12-31     15.1     87         7.32        1016.
## 6 2017-01-01     15.1    100         0           1016

The plot of data after removing the outlier is visualized below:

p <- df_train |>
  ggplot( aes(x=date, y=meantemp)) +
    geom_area(fill="#69b3a2", alpha=0.5) +
    geom_line(color="#69b3a2") +
    ylab("bitcoin price ($)") +
    theme_ipsum()

# Turn it interactive with ggplotly
p <- ggplotly(p)
#p
p

In below we find if there is any missing date

date_range <- seq(min(df_train$date), max(df_train$date), by = 1) 
date_range[!date_range %in% df_train$date] 
## Date of length 0

4.2 Construct Time Series

Thee meantemp column is used to create time series data. The time series is assigned to xts objects. But since many functions later require ts object, each time a xts object is defined, a ts counterpart is created for further possible use. The conversion is performed using txbox::ts_ts function.

min(df_train$date)
## [1] "2013-01-01"
max(df_train$date)
## [1] "2017-01-01"
#ts_train <- zoo(df_train$meantemp, df_train$date)

xts_train_meantemp <- xts(df_train$meantemp, order.by=df_train$date, "%Y-%m-%d")
class(xts_train_meantemp)
## [1] "xts" "zoo"
head(xts_train_meantemp)
##                 [,1]
## 2013-01-01 10.000000
## 2013-01-02  7.400000
## 2013-01-03  7.166667
## 2013-01-04  8.666667
## 2013-01-05  6.000000
## 2013-01-06  7.000000
tail(xts_train_meantemp)
##                [,1]
## 2016-12-27 16.85000
## 2016-12-28 17.21739
## 2016-12-29 15.23810
## 2016-12-30 14.09524
## 2016-12-31 15.05263
## 2017-01-01 15.05263
# convert xts to ts

## Create a daily Date object for ts
#inds <- seq(as.Date("2013-01-01"), as.Date("2017-01-01"), by = "day")

#set.seed(25)
#ts_train <- ts(df_train$meantemp,     # random data
#           start = c(2013, as.numeric(format(inds[1], "%j"))),
#           frequency = 365)


#ts_train <- ts(df_train$meantemp, start = decimal_date(ymd("2013-01-01")), frequency = 365.25 / 7)

Convert XTS objects to TS objects:

ts_train_meantemp <- ts_ts(xts_train_meantemp)
head(ts_train_meantemp)
## Time Series:
## Start = 2013 
## End = 2013.01368953503 
## Frequency = 365.2425 
## [1] 10.000000  7.400000  7.166667  8.666667  6.000000  7.000000
tail(ts_train_meantemp)
## Time Series:
## Start = 2016.98639260218 
## End = 2017.00008213721 
## Frequency = 365.2425 
## [1] 16.85000 17.21739 15.23810 14.09524 15.05263 15.05263

Static plot is show in the following:

ts_plot(xts_train_meantemp)

4.3 Treat Outliers: Automatic

The tsclean function identifies and replace outliers and missing values in a time series. It uses Friedman’s Super Smoother estimator for non-seasonal series and a robust STL decomposition for seasonal series. To estimate missing values and outlier replacements, linear interpolation is used on the (possibly seasonally adjusted) series. In our case, the robust STL decomposition is used due to existing of seasonalities.

ts_plot(xts_train_meantemp)

xts_train_meantemp <- tsclean(xts_train_meantemp)
ts_plot(xts_train_meantemp)

I speculate that since there was not high fluctuations in the time series, there was no change. However, for instance in the wind_speed column, there are salient changes when using tsclean, as evident when applying in VAR.

4.4 Seasonality

From the initial plot I judge that there is seasonality. For more delicate observation to find if there is more granular periods of seasonality, I use seasonality plots. Before that, I aggregate data weekly, monthly, and quarterly.

4.4.1 Seasonality Plots

# Weekly mean temperature
xts_week_train_meantemp <- apply.weekly(xts_train_meantemp,sum)
ts_week_train_meantemp <-ts_ts(xts_week_train_meantemp)

# Monthly mean temperature
xts_mon_train_meantemp <- aggregate(xts_train_meantemp, by=as.yearmon, FUN=sum)
ts_mon_train_meantemp <-ts_ts(xts_mon_train_meantemp)

# Quarterly mean temperature
xts_quar_train_meantemp <- aggregate(xts_train_meantemp, as.yearqtr, FUN=sum)
ts_quar_train_meantemp <-ts_ts(xts_quar_train_meantemp)


# Yearly mean temperate
as.year <- function(x) as.integer(as.yearmon(x))
xts_year_train_meantemp <- aggregate(xts_train_meantemp, by=as.year, FUN=sum)
#ts_year_train_meantemp <-ts_ts(xts_year_train_meantemp)
#xts_year_train_meantemp[1]

The year 2017 has only one observation, so it is removed it from all the aggregated datasets. I couldn’t do it before aggregating, otherwise I would have confronted the error Error: series has no regular pattern.

xts_week_train_meantemp <- head(xts_week_train_meantemp, -1)
xts_mon_train_meantemp <- head(xts_mon_train_meantemp, -1)
xts_quar_train_meantemp <- head(xts_quar_train_meantemp, -1)

ts_week_train_meantemp <- head(ts_week_train_meantemp, -1)
ts_mon_train_meantemp <- head(ts_mon_train_meantemp, -1)
ts_quar_train_meantemp <- head(ts_quar_train_meantemp, -1)
#options(repr.plot.width = 7, repr.plot.height =20)
forecast::ggseasonplot(ts_mon_train_meantemp, year.labels=TRUE, year.labels.left=TRUE, labelgap = 0.1) +
  ylab("degree") +
  ggtitle("Seasonal plot: Monthly Mean Temperature")

forecast::ggseasonplot(ts_mon_train_meantemp, year.labels=TRUE, year.labels.left=TRUE, labelgap = 0.1, polar=TRUE) +
  ylab("degree") +
  ggtitle("Polar Seasonal plot: Monthly Mean Temperature")

#options(repr.plot.width = 7, repr.plot.height =20)
forecast::ggseasonplot(ts_quar_train_meantemp, year.labels=TRUE, year.labels.left=TRUE, labelgap = 0.1) +
  ylab("degree") +
  ggtitle("Seasonal plot: Quarterly Mean Temperature")

forecast::ggseasonplot(ts_quar_train_meantemp, year.labels=TRUE, year.labels.left=TRUE, labelgap = 0.1, polar=TRUE) +
  ylab("degree") +
  ggtitle("Polar Seasonal plot: Quarterly Mean Temperature")

Judging from the plots, all months seem to have seasonalities, with stronger ones in second and third quarters of each year.

4.4.2 Deseasonalize

If one intends to remove different periods of seasonality together, he can use the forecast::msts function. For instance in below, weekly and yearly seasonality are removed together.

des_ts_train_meantemp <- msts(xts_train_meantemp,seasonal.periods = c(7,365))
#head(des_xts_train)
#library(tsbox)
#ts_train <-ts_ts(xts_train)
#ts_train

class(des_ts_train_meantemp)
## [1] "msts" "ts"

The output of msts had an a peculiar shape, and it is not using the state-of-the-art X13 decomposition. To address these limitations, I incorporated the X-13ARIMA-SEATS using seasonal:seas function. it has some of its own limitations, as stated in the package’s reference manua. For instance, the number of observations must not exceed 780, nor should maximum seasonal period exceed 12. That is why I couldn’t use original data ts_train and also the weekly aggregated data ts_week_train, as I would confront the error Seasonal period too large. The only possible aggregated data with highest frequency possible was monthly aggregated, ts_mon_train. However, I am concerned that I would lose significant pattern and information with this amount of aggregation.

length(xts_train_meantemp)
## [1] 1462
length(ts_train_meantemp)
## [1] 1462
length(xts_train_meantemp)
## [1] 1462
nowXTS <-ts_xts(ts_train_meantemp)
length(nowXTS)
## [1] 1462
length(ts_week_train_meantemp)
## [1] 208
plot(ts_week_train_meantemp)

length(ts_week_train_meantemp)
## [1] 208
plot(ts_train_meantemp)

length(ts_train_meantemp)
## [1] 1462
plot(ts_mon_train_meantemp)

length(ts_mon_train_meantemp)
## [1] 48
m <- seas(ts_mon_train_meantemp)
ts_train_adj_meantemp <- final(m)
#ts_train_adj
length(ts_train_adj_meantemp)
## [1] 48
m <- seas(ts_mon_train_meantemp)
ts_train_adj_meantemp <- final(m)
#ts_train_adj
length(ts_train_adj_meantemp)
## [1] 48
plot(ts_train_adj_meantemp)

Plot original data along with trend and seasonally adjusted data

#ts_train
#series(m, "forecast.forecasts")
#out(m)
#seasadj(m)
autoplot(ts_mon_train_meantemp, series="Original Data") +
autolayer(trendcycle(m), series="Trend") +
autolayer(seasadj(m), series="Seasonally Adjusted") +
xlab("Year") + ylab("Mean Temperature") +
ggtitle("Mean Temperature Decomposed using X13") +
scale_colour_manual(values=c("gray","blue","red"),
           breaks=c("Original Data","Seasonally Adjusted","Trend"))

#ap < ggplotly(ap)

4.5 Detrend

In the seasonally adjusted time series ts_train_adj, a trend is salient. It can be removed by differencing as follows:

#ts_train_adj_meantemp |> log() |> nsdiffs(alpha=0.01) -> ts_train_adj_det_meantemp
ts_train_adj_meantemp |> log() |> diff() -> ts_train_adj_det_meantemp
plot(ts_train_adj_det_meantemp)

#plot(d)

4.6 Correlation Plots

In the following the autocorrelation function (AC) and partial autocorrelation function (PACF) are visualized for both unadjusted and adjusted verions of time series.

  1. Weekly aggregated of original time series
ggAcf(ts_week_train_meantemp, lag=50)

pacf (ts_week_train_meantemp, lag=50, pl = TRUE)

  1. Seasonally Adjusted
ggAcf(ts_train_adj_meantemp, lag=10)

pacf (ts_train_adj_meantemp, lag=10, pl = TRUE)

  1. Seasonally Adjusted and Detrended
ggAcf(ts_train_adj_det_meantemp, lag=10)

pacf (ts_train_adj_det_meantemp, lag=10, pl = TRUE)

4.7 Prepare Test Set

In section Preprocessing and Analysis Section, all the preprocessing steps are applied on the training dataset. In the following, same processes are applied on the test dataset.

summary(df_test)
##       date               meantemp        humidity       wind_speed    
##  Min.   :2017-01-01   Min.   :11.00   Min.   :17.75   Min.   : 1.387  
##  1st Qu.:2017-01-29   1st Qu.:16.44   1st Qu.:39.62   1st Qu.: 5.564  
##  Median :2017-02-26   Median :19.88   Median :57.75   Median : 8.069  
##  Mean   :2017-02-26   Mean   :21.71   Mean   :56.26   Mean   : 8.144  
##  3rd Qu.:2017-03-26   3rd Qu.:27.71   3rd Qu.:71.90   3rd Qu.:10.069  
##  Max.   :2017-04-24   Max.   :34.50   Max.   :95.83   Max.   :19.314  
##   meanpressure 
##  Min.   :  59  
##  1st Qu.:1007  
##  Median :1013  
##  Mean   :1004  
##  3rd Qu.:1017  
##  Max.   :1023
df_test |> describe()
## df_test 
## 
##  5  Variables      114  Observations
## --------------------------------------------------------------------------------
## date 
##          n    missing   distinct       Info       Mean        Gmd        .05 
##        114          0        114          1 2017-02-26      38.33 2017-01-06 
##        .10        .25        .50        .75        .90        .95 
## 2017-01-12 2017-01-29 2017-02-26 2017-03-26 2017-04-12 2017-04-18 
## 
## lowest : 2017-01-01 2017-01-02 2017-01-03 2017-01-04 2017-01-05
## highest: 2017-04-20 2017-04-21 2017-04-22 2017-04-23 2017-04-24
## --------------------------------------------------------------------------------
## meantemp 
##        n  missing distinct     Info     Mean      Gmd      .05      .10 
##      114        0      105        1    21.71    7.226    13.22    14.75 
##      .25      .50      .75      .90      .95 
##    16.44    19.88    27.71    31.00    32.67 
## 
## lowest : 11.00000 11.72222 11.78947 12.11111 13.04167
## highest: 32.90000 33.50000 34.00000 34.25000 34.50000
## --------------------------------------------------------------------------------
## humidity 
##        n  missing distinct     Info     Mean      Gmd      .05      .10 
##      114        0      109        1    56.26    21.97    26.74    29.49 
##      .25      .50      .75      .90      .95 
##    39.62    57.75    71.90    78.42    82.20 
## 
## lowest : 17.75000 19.42857 21.12500 24.12500 26.00000
## highest: 83.52632 84.44444 85.86957 91.64286 95.83333
## --------------------------------------------------------------------------------
## wind_speed 
##        n  missing distinct     Info     Mean      Gmd      .05      .10 
##      114        0      109        1    8.144    4.031    2.842    3.715 
##      .25      .50      .75      .90      .95 
##    5.564    8.069   10.069   13.464   14.353 
## 
## lowest :  1.387500  1.625000  1.854545  1.950000  2.100000
## highest: 14.384615 15.512500 16.662500 17.590000 19.314286
## --------------------------------------------------------------------------------
## meanpressure 
##        n  missing distinct     Info     Mean      Gmd      .05      .10 
##      114        0      109        1     1004    23.16     1002     1004 
##      .25      .50      .75      .90      .95 
##     1007     1013     1017     1019     1021 
## 
## lowest :   59.000  998.625  999.875 1000.875 1001.600
## highest: 1021.375 1021.556 1021.789 1021.958 1022.810
##                                   
## Value         60  1000  1010  1020
## Frequency      1    14    53    46
## Proportion 0.009 0.123 0.465 0.404
## 
## For the frequency table, variable is rounded to the nearest 10
## --------------------------------------------------------------------------------
xts_test_meantemp <- xts(df_test$meantemp, order.by=df_test$date, "%Y-%m-%d")
head(xts_test_meantemp)
##                [,1]
## 2017-01-01 15.91304
## 2017-01-02 18.50000
## 2017-01-03 17.11111
## 2017-01-04 18.70000
## 2017-01-05 18.38889
## 2017-01-06 19.31818
tail(xts_test_meantemp)
##              [,1]
## 2017-04-19 33.500
## 2017-04-20 34.500
## 2017-04-21 34.250
## 2017-04-22 32.900
## 2017-04-23 32.875
## 2017-04-24 32.000
ts_plot(xts_test_meantemp)

ts_test_meantemp <- ts_ts(xts_test_meantemp)
xts_week_test_meantemp <- apply.weekly(xts_test_meantemp,sum)
ts_week_test_meantemp <- na.remove(ts_ts(xts_week_test_meantemp))
#ts_week_test_meantemp <- as.ts(xts_week_test_meantemp)
length(ts_week_test_meantemp)
## [1] 18
ts_plot(xts_week_test_meantemp)

5 Testing Stationarity: Unit Root Tests

5.1 ADF

In Augmented Dicky Fuller (ADF) test, the null hypothesis is \(H_0\): there is a unit root (equivalently, is a non-stationary time series), while the alternate hypothesis is \(H_1\): time series is stationary. DF test is valid if the time series is well characterized by an \(AR(1)\) model with noise errors. ADF test unlike DF test can be applied on a large sized set of time series models. For this reason, it was preferred in this work over DF test.

  1. Original Time Series and its Weekly Adjusted
ts_train_meantemp |> adf.test()
## 
##  Augmented Dickey-Fuller Test
## 
## data:  ts_train_meantemp
## Dickey-Fuller = -1.9871, Lag order = 11, p-value = 0.5838
## alternative hypothesis: stationary
ts_week_train_meantemp |> adf.test()
## 
##  Augmented Dickey-Fuller Test
## 
## data:  ts_week_train_meantemp
## Dickey-Fuller = -3.8729, Lag order = 5, p-value = 0.01651
## alternative hypothesis: stationary
  1. Seasonally Adjusted
ts_train_adj_meantemp |> adf.test() 
## 
##  Augmented Dickey-Fuller Test
## 
## data:  ts_train_adj_meantemp
## Dickey-Fuller = -2.5897, Lag order = 3, p-value = 0.3386
## alternative hypothesis: stationary
  1. Seasonally Adjusted and Detrended
ts_train_adj_det_meantemp |> adf.test() 
## Warning in adf.test(ts_train_adj_det_meantemp): p-value smaller than printed p-
## value
## 
##  Augmented Dickey-Fuller Test
## 
## data:  ts_train_adj_det_meantemp
## Dickey-Fuller = -4.698, Lag order = 3, p-value = 0.01
## alternative hypothesis: stationary

In the ADF test, smaller test statistics indicates more likelihood of the null-hypothesis (time series is not stationary) be true. If we set threshold to be 0.05, the p-values less than this value imply null hypothesis is unlikely to be true, and we can increase our certainty in alternate hypothesis (time series is stationary). For all the investigated datasets, results are reported in the following:

  1. Original time series: non-stationary
  2. Seasonally adjusted: non-stationary
  3. Seasonally adjusted and detrended: stationary
  4. Weekly aggregated time series: stationary

5.2 KPSS

In Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test, the null hypothesis is \(H_0\): time series is stationary (level or trend), while the alternate hypothesis is \(H_1\): time series is non-stationary.

  1. Original Time Series and also its weekly aggregated
ts_train_meantemp |>  kpss.test()
## 
##  KPSS Test for Level Stationarity
## 
## data:  ts_train_meantemp
## KPSS Level = 0.58122, Truncation lag parameter = 7, p-value = 0.02434
ts_week_train_meantemp |>  kpss.test()
## Warning in kpss.test(ts_week_train_meantemp): p-value greater than printed p-
## value
## 
##  KPSS Test for Level Stationarity
## 
## data:  ts_week_train_meantemp
## KPSS Level = 0.16182, Truncation lag parameter = 4, p-value = 0.1
  1. Seasonally Adjusted
ts_train_adj_meantemp |>  kpss.test()
## Warning in kpss.test(ts_train_adj_meantemp): p-value smaller than printed p-
## value
## 
##  KPSS Test for Level Stationarity
## 
## data:  ts_train_adj_meantemp
## KPSS Level = 0.99735, Truncation lag parameter = 3, p-value = 0.01
  1. Seasonally Adjusted and Detrended
ts_train_adj_det_meantemp |> kpss.test()
## Warning in kpss.test(ts_train_adj_det_meantemp): p-value greater than printed p-
## value
## 
##  KPSS Test for Level Stationarity
## 
## data:  ts_train_adj_det_meantemp
## KPSS Level = 0.059493, Truncation lag parameter = 3, p-value = 0.1

For all the investigated datasets, results are reported in the following:

  1. Original time series: non-stationary
  2. Seasonally adjusted: stationary
  3. Seasonally adjusted and detrended: non-stationary
  4. Weekly aggregated time series: non-stationary

6 Forecasting

6.1 SARIMA

The seasonal autoregressive integrated moving average (SARIMA) is used in the following to forecast time series. The auto.arima function is used to find the following parameters, in which (p, d, q) correspond to non-seasonal component of time series, whereas (P, D, Q) correspond to seasonal component.

  • p: Auto-regressive lag order
  • d: Order of first-differencing
  • q: Moving average lag order
  • P: Auto-regressive lag order
  • D: Order of seasonal-differencing
  • Q: Moving average lag order

The function returns the best ARIMA model according to either AIC. Since the runtime of the function is very long, the best parameters found are stored, and then they are used as both minimum and maximum option, although the auto.arima function nonetheless tries some initial fixed orders before using the predefined lags. Therefore, the options are reduced but not to 1 combination.

  1. Forecast original time series of meantemp, as the original data has very high frequency, which makes it unsuitable for ARMA. For this case, I set seasonal=TRUE, as in subsequent cases I use data that I seasonally adjusted them already. Setting seasonal=TRUE makes the model more time-consuming.
forecast_ts_train_meantemp <- auto.arima(ts_train_meantemp,
                            d = 1,
                            D = 1,
                            start.p = 2,
                            start.q = 3,
                            max.p = 2,
                            max.q = 3,
                            start.P = 0,
                            start.Q = 0,
                            max.P = 0,
                            max.Q = 0,
                            trace = TRUE, 
                            seasonal=TRUE,
                            stepwise=TRUE,
                            approximation=FALSE
                            #,xreg = xreg_matrix
                            )
## 
##  ARIMA(2,1,3)(0,1,0)[365]                    : 4789.495
##  ARIMA(0,1,0)(0,1,0)[365]                    : 4943.724
##  ARIMA(1,1,0)(0,1,0)[365]                    : 4926.887
##  ARIMA(0,1,1)(0,1,0)[365]                    : 4920.711
##  ARIMA(1,1,3)(0,1,0)[365]                    : 4789.51
##  ARIMA(2,1,2)(0,1,0)[365]                    : Inf
##  ARIMA(1,1,2)(0,1,0)[365]                    : 4790.218
## 
##  Best model: ARIMA(2,1,3)(0,1,0)[365]
checkresiduals(forecast_ts_train_meantemp)

## 
##  Ljung-Box test
## 
## data:  Residuals from ARIMA(2,1,3)(0,1,0)[365]
## Q* = 363.76, df = 287, p-value = 0.001427
## 
## Model df: 5.   Total lags used: 292
forecast_ts_train_meantemp
## Series: ts_train_meantemp 
## ARIMA(2,1,3)(0,1,0)[365] 
## 
## Coefficients:
##          ar1     ar2      ma1      ma2      ma3
##       0.0104  0.4362  -0.2760  -0.6134  -0.0888
## s.e.  0.3402  0.2577   0.3395   0.3585   0.0480
## 
## sigma^2 = 4.561:  log likelihood = -2388.71
## AIC=4789.42   AICc=4789.5   BIC=4819.41
  1. Forecast original time series of meantemp but aggregated weekly, as the original data has very high frequency, which makes it unsuitable for ARMA. For this case, I set seasonal=TRUE, as in case 3, I use data that I seasonally adjusted them already. Setting seasonal=TRUE makes the model more time-consuming.
forecast_ts_week_train_meantemp = auto.arima(ts_week_train_meantemp, 
                                             d = 1, 
                                             D = 1, 
                                             start.p = 4, 
                                             start.q = 0, 
                                             max.p = 4, 
                                             max.q = 0, 
                                             start.P = 1, 
                                             start.Q = 0, 
                                             max.P = 1, 
                                             max.Q = 0, 
                                             trace = TRUE,  
                                             seasonal=TRUE, 
                                             stepwise=FALSE, 
                                             approximation=FALSE)
## 
##  ARIMA(0,1,0)(0,1,0)[52]                    : 1348.451
##  ARIMA(0,1,0)(1,1,0)[52]                    : 1317.269
##  ARIMA(1,1,0)(0,1,0)[52]                    : 1333.282
##  ARIMA(1,1,0)(1,1,0)[52]                    : 1303.962
##  ARIMA(2,1,0)(0,1,0)[52]                    : 1325.56
##  ARIMA(2,1,0)(1,1,0)[52]                    : 1295.714
##  ARIMA(3,1,0)(0,1,0)[52]                    : 1317.794
##  ARIMA(3,1,0)(1,1,0)[52]                    : 1286.975
##  ARIMA(4,1,0)(0,1,0)[52]                    : 1299.762
##  ARIMA(4,1,0)(1,1,0)[52]                    : 1269.65
## 
## 
## 
##  Best model: ARIMA(4,1,0)(1,1,0)[52]
checkresiduals(forecast_ts_week_train_meantemp)

## 
##  Ljung-Box test
## 
## data:  Residuals from ARIMA(4,1,0)(1,1,0)[52]
## Q* = 33.201, df = 37, p-value = 0.6478
## 
## Model df: 5.   Total lags used: 42
forecast_ts_week_train_meantemp
## Series: ts_week_train_meantemp 
## ARIMA(4,1,0)(1,1,0)[52] 
## 
## Coefficients:
##           ar1      ar2      ar3      ar4     sar1
##       -0.5755  -0.5066  -0.4439  -0.3623  -0.5049
## s.e.   0.0786   0.0849   0.0844   0.0792   0.0746
## 
## sigma^2 = 181.4:  log likelihood = -628.54
## AIC=1269.08   AICc=1269.65   BIC=1287.34
  1. Forecast deseasonalized time series
forecast_ts_train_adj_meantemp = auto.arima(ts_train_adj_meantemp, 
                                            trace = TRUE,  
                                            seasonal= FALSE, 
                                            stepwise=FALSE, 
                                            approximation=FALSE, 
                                            d = 1, 
                                            start.p = 0, 
                                            start.q = 1, 
                                            max.p = 0, 
                                            max.q = 1)
## 
##  ARIMA(0,1,0)                               : 441.3883
##  ARIMA(0,1,0)            with drift         : 443.1154
##  ARIMA(0,1,1)                               : 429.9577
##  ARIMA(0,1,1)            with drift         : 429.5371
## 
## 
## 
##  Best model: ARIMA(0,1,1)            with drift
checkresiduals(forecast_ts_train_adj_meantemp)

## 
##  Ljung-Box test
## 
## data:  Residuals from ARIMA(0,1,1) with drift
## Q* = 7.4191, df = 9, p-value = 0.5936
## 
## Model df: 1.   Total lags used: 10
forecast_ts_train_adj_meantemp
## Series: ts_train_adj_meantemp 
## ARIMA(0,1,1) with drift 
## 
## Coefficients:
##           ma1   drift
##       -0.6827  1.9880
## s.e.   0.1279  1.0526
## 
## sigma^2 = 488.7:  log likelihood = -211.49
## AIC=428.98   AICc=429.54   BIC=434.53
  1. Forecast deseasonalized and detrended time series
forecast_ts_train_adj_det_meantemp = auto.arima(ts_train_adj_det_meantemp, 
                                                trace = TRUE,
                                                seasonal= FALSE, 
                                                stepwise=FALSE, 
                                                approximation=FALSE,
                                                d = 0, 
                                                start.p = 0, 
                                                start.q = 1, 
                                                max.p = 0, 
                                                max.q = 1)
## 
##  ARIMA(0,0,0)            with zero mean     : -183.1965
##  ARIMA(0,0,0)            with non-zero mean : -181.4467
##  ARIMA(0,0,1)            with zero mean     : -195.2457
##  ARIMA(0,0,1)            with non-zero mean : -195.724
## 
## 
## 
##  Best model: ARIMA(0,0,1)            with non-zero mean
checkresiduals(forecast_ts_train_adj_det_meantemp)

## 
##  Ljung-Box test
## 
## data:  Residuals from ARIMA(0,0,1) with non-zero mean
## Q* = 7.1212, df = 8, p-value = 0.5236
## 
## Model df: 1.   Total lags used: 9
#checkresiduals(forecast_ts_train_meantemp)
forecast_ts_train_adj_det_meantemp
## Series: ts_train_adj_det_meantemp 
## ARIMA(0,0,1) with non-zero mean 
## 
## Coefficients:
##           ma1    mean
##       -0.6984  0.0025
## s.e.   0.1247  0.0013
## 
## sigma^2 = 0.0008149:  log likelihood = 101.14
## AIC=-196.28   AICc=-195.72   BIC=-190.73

6.1.1 Evaluate

Based on the results from the forecast of original data (case 1), we have:

AIC_ARMA <- AIC(forecast_ts_train_meantemp)
AIC_ARMA
## [1] 4789.418
BIC_ARMA <- BIC(forecast_ts_train_meantemp)
BIC_ARMA
## [1] 4819.414

The following evaluation metrics between prediction and test data are manually computed: RMSE, MAE, \(R^2\) score.

forecast <- forecast_ts_train_meantemp |> forecast(h=114)
#forecast
predicted <- as.numeric(forecast$mean)
actual <- as.numeric(ts_test_meantemp)
RMSE_ARMA <- rmse(predicted, actual)
RMSE_ARMA
## [1] 4.298832
MAE_ARMA <- mae(predicted, actual)
MAE_ARMA
## [1] 3.575725
rsq <- function (x, y) cor(x, y) ^ 2

RSQ_ARMA <- rsq(actual, predicted)

RSQ_ARMA
## [1] 0.8174112

Two tables will be presented, one of which reporting metrics of the model applied on training set, and the other reporting metrics for evaluating predictions based on test set.

d <- cbind(AIC = AIC_ARMA, BIC = BIC_ARMA)
# at most 4 decimal places
knitr::kable(d, digits = 4)
AIC BIC
4789.418 4819.414
d <- cbind(R2 = RSQ_ARMA, RMSE = RMSE_ARMA, MAE = MAE_ARMA)
# at most 4 decimal places
knitr::kable(d, digits = 4)
R2 RMSE MAE
0.8174 4.2988 3.5757

6.1.2 Plot Forecast

  1. Original time series of meantemp
autoplot(forecast(forecast_ts_train_meantemp))# + autolayer(xts_test_meantemp)

length(ts_test_meantemp)
## [1] 114
forecast_ts_train_meantemp |> forecast(h=114) |>
autoplot() + autolayer(ts_test_meantemp)

  1. Original time series of meantemp but aggregated weekly
#autoplot(forecast(ts_week_train_meantemp)) #+ autolayer(ts_week_test_meantemp)
  1. Deseasonalized time series
#forecast_ts_train_adj + ts_train_adj
autoplot(forecast(forecast_ts_train_adj_meantemp))

  1. Deseasonalized and detrended time series
autoplot(forecast(forecast_ts_train_adj_det_meantemp))

The plot of forecasting the test data (using forecast from case 1) joint with test data is displayed in the following:

#ts_plot(ts_test_meantemp, forecast$mean)
#ts.union(ts_test_meantemp, forecast$mean)
#forecast$mean

xts_temp <- xts(ts_test_meantemp, order.by=df_test$date, "%Y-%m-%d")
xts_temp_2 <- xts(forecast$mean, order.by=df_test$date, "%Y-%m-%d")
#xts_temp
#xts_temp_2
ts_plot(xts_temp, xts_temp_2)

I confronted some issue adding the seasonality and trend componend of the adjusted versions to the original time series. Notwithstanding, judging by the results, the processed original data (without deseaosnalisation and without detrending) is a proper input to the ARMA model, as the function handles seasonalities by itself, and the results demonstrate a promising forecasting performance. Therefore, henceforth the processed time series but with no adjustment are used for the subsequent forecasting parts of this work.

6.2 Vector autoregressive (VAR)

The vector autoregressiv the multivariate time series by using both the columns meantemp and wind_speed.

The wind_speed is plotted interactively in the following:

p2 <- df_train |>
  ggplot( aes(x=date, y=wind_speed)) +
    geom_area(fill="#69b3a2", alpha=0.5) +
    geom_line(color="#69b3a2") +
    ylab("bitcoin price ($)") +
    theme_ipsum()

# Turn it interactive with ggplotly
p2 <- ggplotly(p2)
#p
p2

Same as Construct Time Series, a time series object is constructed from the wind_speed column.

#xts_train_meantemp <- xts(df_train$meantemp, order.by=df_train$date, "%Y-%m-%d")
#ts_train_meantemp <-ts_ts(xts_train_meantemp)

xts_train_windspeed <- xts(df_train$wind_speed, order.by=df_train$date, "%Y-%m-%d")
ts_train_windspeed <-ts_ts(xts_train_windspeed)

A static plot of time series is provided below:

ts_plot(ts_train_windspeed)

The outliers of time series are treated automatically, same as Treat Outliers: Automatic.

xts_train_windspeed <- tsclean(xts_train_windspeed)

The plot of the resulted time series is visualized below:

ts_plot(xts_train_windspeed)

The test data for wind_speed is created and processed using the same procedure followed for train data:

xts_test_windspeed <- xts(df_test$wind_speed, order.by=df_test$date, "%Y-%m-%d")
head(xts_test_windspeed)
##                [,1]
## 2017-01-01 2.743478
## 2017-01-02 2.894444
## 2017-01-03 4.016667
## 2017-01-04 4.545000
## 2017-01-05 3.300000
## 2017-01-06 8.681818
tail(xts_test_windspeed)
##                [,1]
## 2017-04-19  9.02500
## 2017-04-20  5.56250
## 2017-04-21  6.96250
## 2017-04-22  8.89000
## 2017-04-23  9.96250
## 2017-04-24 12.15714
ts_plot(xts_test_windspeed)

xts_test_windspeed <- tsclean(xts_test_windspeed)
ts_test_windspeed <- ts_ts(xts_test_windspeed)
ts_plot(xts_test_windspeed)

In what follows, interactive plot of both time series are illustrated:

fig <- plot_ly(df_train, type = 'scatter', mode = 'lines')%>%
  add_trace(x = ~date, y = ~meantemp, name = 'MeanTemp')%>%
  add_trace(x = ~date, y = ~wind_speed, name = 'WindSpeed')%>%
  layout(title = 'custom tick labels',legend=list(title=list(text='variable')),
         xaxis = list(dtick = "M1", tickformat= "%b\n%Y"), width = 2000)
## Warning: Specifying width/height in layout() is now deprecated.
## Please specify in ggplotly() or plot_ly()
options(warn = -1)
fig <- fig %>%
  layout(
         xaxis = list(zerolinecolor = '#ffff',
                      zerolinewidth = 2,
                      gridcolor = 'ffff',  tickangle = 0),
         yaxis = list(zerolinecolor = '#ffff',
                      zerolinewidth = 2,
                      gridcolor = 'ffff'),
         plot_bgcolor='#e5ecf6')


fig

Weekly aggregated time series are also constructed:

# Weekly mean temperature
xts_week_train_windspeed <- apply.weekly(xts_train_windspeed, sum)
ts_week_train_windspeed <- ts_ts(xts_week_train_windspeed)

xts_week_test_windspeed <- apply.weekly(xts_test_windspeed, sum)
ts_week_test_windspeed <- na.remove(ts_ts(xts_week_test_windspeed))
#ts_week_test_windspeed <- as.ts(xts_week_test_windspeed)
ts_week_test_meantemp
## Time Series:
## Start = 2017 
## End = 2017.30938349179 
## Frequency = 54.9479867256584 
##  [1]  15.91304 122.41073  92.34209 103.12740 120.67435 117.87830 109.63056
##  [8] 135.81840 139.83333 150.79487 140.80200 149.57842 188.10000 211.42460
## [15] 201.63632 208.74603 234.58056  32.00000
## attr(,"na.removed")
##  [1]   2   3   4   5   6   7   9  10  11  12  13  14  16  17  18  19  20  21  23
## [20]  24  25  26  27  28  30  31  32  33  34  35  37  38  39  40  41  42  44  45
## [39]  46  47  48  49  51  52  53  54  55  56  58  59  60  61  62  63  65  66  67
## [58]  68  69  70  72  73  74  75  76  77  79  80  81  82  83  84  86  87  88  89
## [77]  90  91  93  94  95  96  97  98 100 101 102 103 104 105 107 108 109 110 111
## [96] 112

Both original data and weekly aggregated one are visualized in the following:

ts_plot(ts_week_train_windspeed)

ts_plot(xts_train_windspeed)

Initially, the original time series is fed to the model, yet it yielded porr performance, which I speculate to be attributed to many fluctuations and seasonality components. On the other hand, using the weekly aggregated data leads to a significant imporve.

Both time sreies are merged and then fed to the VAR model. NA values are also removed from the merged time series.

#VAR_data <- ts.union(ts_train_meantemp, ts_train_windspeed)
VAR_data <- ts.union(ts_week_train_meantemp, ts_week_train_windspeed)
colnames(VAR_data) <- cbind("meantemp","wind_speed")
#v1 <- cbind(ts_week_train_meantemp, ts_week_train_windspeed)
#colnames(v1) <- cbind("meantemp","wind_speed")
#lagselect <- VARselect(v1, type = "both")
#lagselect$selection
VAR_data <- na.remove(VAR_data)
#tail(v1)

We look at different lags suggested by different criteria if we use VAR model.

lagselect <- VARselect(VAR_data, season=12, type = "both")
lagselect$selection
## AIC(n)  HQ(n)  SC(n) FPE(n) 
##     10      6      1     10
lagselect$criteria
##                  1           2           3           4           5           6
## AIC(n)    10.86018    10.81179    10.74853    10.72197    10.70787    10.63106
## HQ(n)     11.06185    11.04034    11.00397    11.00430    11.01708    10.96716
## SC(n)     11.35841    11.37644    11.37961    11.41948    11.47181    11.46143
## FPE(n) 52091.96344 49644.23391 46616.68238 45413.78546 44800.44647 41513.24904
##                  7           8           9          10
## AIC(n)    10.61618    10.62058    10.63627    10.60996
## HQ(n)     10.97918    11.01047    11.05304    11.05362
## SC(n)     11.51298    11.58381    11.66592    11.70605
## FPE(n) 40929.31852 41143.70844 41833.75446 40791.90935

Now that we have merged the column meantemp with wind_speed, we use VAR models with lag to be 10.

VAR_est <- VAR(y = VAR_data, season=8, type="both", p=10)
VAR_est
## 
## VAR Estimation Results:
## ======================= 
## 
## Estimated coefficients for equation meantemp: 
## ============================================= 
## Call:
## meantemp = meantemp.l1 + wind_speed.l1 + meantemp.l2 + wind_speed.l2 + meantemp.l3 + wind_speed.l3 + meantemp.l4 + wind_speed.l4 + meantemp.l5 + wind_speed.l5 + meantemp.l6 + wind_speed.l6 + meantemp.l7 + wind_speed.l7 + meantemp.l8 + wind_speed.l8 + meantemp.l9 + wind_speed.l9 + meantemp.l10 + wind_speed.l10 + const + trend + sd1 + sd2 + sd3 + sd4 + sd5 + sd6 + sd7 
## 
##    meantemp.l1  wind_speed.l1    meantemp.l2  wind_speed.l2    meantemp.l3 
##    0.689509677   -0.036305048    0.122053939    0.130967121    0.049179176 
##  wind_speed.l3    meantemp.l4  wind_speed.l4    meantemp.l5  wind_speed.l5 
##    0.046573897   -0.002112818    0.110696976    0.238902095   -0.050100942 
##    meantemp.l6  wind_speed.l6    meantemp.l7  wind_speed.l7    meantemp.l8 
##   -0.146334371    0.144786326   -0.113654255   -0.002846842   -0.148801275 
##  wind_speed.l8    meantemp.l9  wind_speed.l9   meantemp.l10 wind_speed.l10 
##    0.149206407    0.074555275   -0.030718020   -0.049045011    0.219351729 
##          const          trend            sd1            sd2            sd3 
##   17.370795528    0.021320033    8.139625406    0.391065418    5.383036420 
##            sd4            sd5            sd6            sd7 
##    1.677946665   10.140458250    0.772150182    0.358336901 
## 
## 
## Estimated coefficients for equation wind_speed: 
## =============================================== 
## Call:
## wind_speed = meantemp.l1 + wind_speed.l1 + meantemp.l2 + wind_speed.l2 + meantemp.l3 + wind_speed.l3 + meantemp.l4 + wind_speed.l4 + meantemp.l5 + wind_speed.l5 + meantemp.l6 + wind_speed.l6 + meantemp.l7 + wind_speed.l7 + meantemp.l8 + wind_speed.l8 + meantemp.l9 + wind_speed.l9 + meantemp.l10 + wind_speed.l10 + const + trend + sd1 + sd2 + sd3 + sd4 + sd5 + sd6 + sd7 
## 
##    meantemp.l1  wind_speed.l1    meantemp.l2  wind_speed.l2    meantemp.l3 
##    0.069024744    0.184304838    0.131154305   -0.004551666   -0.013554779 
##  wind_speed.l3    meantemp.l4  wind_speed.l4    meantemp.l5  wind_speed.l5 
##    0.128031917   -0.130028419    0.052890178    0.169230094    0.163426050 
##    meantemp.l6  wind_speed.l6    meantemp.l7  wind_speed.l7    meantemp.l8 
##   -0.075300255    0.027147295   -0.251302983   -0.079158189    0.040539603 
##  wind_speed.l8    meantemp.l9  wind_speed.l9   meantemp.l10 wind_speed.l10 
##   -0.037406248    0.047408589    0.141711053   -0.037567017    0.127941422 
##          const          trend            sd1            sd2            sd3 
##   20.605673892    0.019403293   -7.616951554   -0.788899882   -7.446507564 
##            sd4            sd5            sd6            sd7 
##    1.948843755   -0.822851441   -6.381095605   -8.345748018
summary(VAR_est)
## 
## VAR Estimation Results:
## ========================= 
## Endogenous variables: meantemp, wind_speed 
## Deterministic variables: both 
## Sample size: 198 
## Log Likelihood: -1549.651 
## Roots of the characteristic polynomial:
## 0.9788 0.9788 0.9048 0.9048 0.877 0.877 0.8366 0.8366 0.8299 0.8299 0.8002 0.8002 0.7419 0.7419 0.6714 0.654 0.654 0.5891 0.5891 0.1924
## Call:
## VAR(y = VAR_data, p = 10, type = "both", season = 8L)
## 
## 
## Estimation results for equation meantemp: 
## ========================================= 
## meantemp = meantemp.l1 + wind_speed.l1 + meantemp.l2 + wind_speed.l2 + meantemp.l3 + wind_speed.l3 + meantemp.l4 + wind_speed.l4 + meantemp.l5 + wind_speed.l5 + meantemp.l6 + wind_speed.l6 + meantemp.l7 + wind_speed.l7 + meantemp.l8 + wind_speed.l8 + meantemp.l9 + wind_speed.l9 + meantemp.l10 + wind_speed.l10 + const + trend + sd1 + sd2 + sd3 + sd4 + sd5 + sd6 + sd7 
## 
##                 Estimate Std. Error t value Pr(>|t|)    
## meantemp.l1     0.689510   0.078189   8.818 1.38e-15 ***
## wind_speed.l1  -0.036305   0.067738  -0.536 0.592687    
## meantemp.l2     0.122054   0.093322   1.308 0.192691    
## wind_speed.l2   0.130967   0.066944   1.956 0.052068 .  
## meantemp.l3     0.049179   0.093036   0.529 0.597774    
## wind_speed.l3   0.046574   0.066730   0.698 0.486168    
## meantemp.l4    -0.002113   0.092811  -0.023 0.981865    
## wind_speed.l4   0.110697   0.066391   1.667 0.097299 .  
## meantemp.l5     0.238902   0.092815   2.574 0.010912 *  
## wind_speed.l5  -0.050101   0.066979  -0.748 0.455495    
## meantemp.l6    -0.146334   0.091951  -1.591 0.113380    
## wind_speed.l6   0.144786   0.066433   2.179 0.030683 *  
## meantemp.l7    -0.113654   0.091656  -1.240 0.216691    
## wind_speed.l7  -0.002847   0.066785  -0.043 0.966049    
## meantemp.l8    -0.148801   0.092809  -1.603 0.110736    
## wind_speed.l8   0.149206   0.066382   2.248 0.025890 *  
## meantemp.l9     0.074555   0.091103   0.818 0.414301    
## wind_speed.l9  -0.030718   0.066459  -0.462 0.644525    
## meantemp.l10   -0.049045   0.072711  -0.675 0.500904    
## wind_speed.l10  0.219352   0.067240   3.262 0.001337 ** 
## const          17.370796   4.941341   3.515 0.000564 ***
## trend           0.021320   0.016078   1.326 0.186623    
## sd1             8.139625   3.773365   2.157 0.032409 *  
## sd2             0.391065   3.684293   0.106 0.915594    
## sd3             5.383036   3.758591   1.432 0.153935    
## sd4             1.677947   3.559751   0.471 0.637987    
## sd5            10.140458   3.762532   2.695 0.007747 ** 
## sd6             0.772150   3.686816   0.209 0.834361    
## sd7             0.358337   3.714385   0.096 0.923259    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## 
## Residual standard error: 12.26 on 169 degrees of freedom
## Multiple R-Squared: 0.9453,  Adjusted R-squared: 0.9362 
## F-statistic: 104.2 on 28 and 169 DF,  p-value: < 2.2e-16 
## 
## 
## Estimation results for equation wind_speed: 
## =========================================== 
## wind_speed = meantemp.l1 + wind_speed.l1 + meantemp.l2 + wind_speed.l2 + meantemp.l3 + wind_speed.l3 + meantemp.l4 + wind_speed.l4 + meantemp.l5 + wind_speed.l5 + meantemp.l6 + wind_speed.l6 + meantemp.l7 + wind_speed.l7 + meantemp.l8 + wind_speed.l8 + meantemp.l9 + wind_speed.l9 + meantemp.l10 + wind_speed.l10 + const + trend + sd1 + sd2 + sd3 + sd4 + sd5 + sd6 + sd7 
## 
##                 Estimate Std. Error t value Pr(>|t|)    
## meantemp.l1     0.069025   0.092992   0.742 0.458956    
## wind_speed.l1   0.184305   0.080562   2.288 0.023391 *  
## meantemp.l2     0.131154   0.110990   1.182 0.238993    
## wind_speed.l2  -0.004552   0.079617  -0.057 0.954478    
## meantemp.l3    -0.013555   0.110649  -0.123 0.902647    
## wind_speed.l3   0.128032   0.079363   1.613 0.108556    
## meantemp.l4    -0.130028   0.110382  -1.178 0.240458    
## wind_speed.l4   0.052890   0.078960   0.670 0.503879    
## meantemp.l5     0.169230   0.110387   1.533 0.127130    
## wind_speed.l5   0.163426   0.079660   2.052 0.041756 *  
## meantemp.l6    -0.075300   0.109359  -0.689 0.492044    
## wind_speed.l6   0.027147   0.079010   0.344 0.731579    
## meantemp.l7    -0.251303   0.109008  -2.305 0.022362 *  
## wind_speed.l7  -0.079158   0.079429  -0.997 0.320386    
## meantemp.l8     0.040540   0.110380   0.367 0.713874    
## wind_speed.l8  -0.037406   0.078950  -0.474 0.636255    
## meantemp.l9     0.047409   0.108350   0.438 0.662271    
## wind_speed.l9   0.141711   0.079041   1.793 0.074780 .  
## meantemp.l10   -0.037567   0.086477  -0.434 0.664540    
## wind_speed.l10  0.127941   0.079970   1.600 0.111495    
## const          20.605674   5.876827   3.506 0.000582 ***
## trend           0.019403   0.019122   1.015 0.311698    
## sd1            -7.616952   4.487732  -1.697 0.091484 .  
## sd2            -0.788900   4.381796  -0.180 0.857337    
## sd3            -7.446508   4.470160  -1.666 0.097601 .  
## sd4             1.948844   4.233676   0.460 0.645879    
## sd5            -0.822851   4.474847  -0.184 0.854325    
## sd6            -6.381096   4.384797  -1.455 0.147448    
## sd7            -8.345748   4.417585  -1.889 0.060576 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## 
## Residual standard error: 14.58 on 169 degrees of freedom
## Multiple R-Squared: 0.5074,  Adjusted R-squared: 0.4258 
## F-statistic: 6.217 on 28 and 169 DF,  p-value: 8.952e-15 
## 
## 
## 
## Covariance matrix of residuals:
##            meantemp wind_speed
## meantemp     150.37      49.25
## wind_speed    49.25     212.69
## 
## Correlation matrix of residuals:
##            meantemp wind_speed
## meantemp     1.0000     0.2754
## wind_speed   0.2754     1.0000
summary(VAR_est$varresult)
##            Length Class Mode
## meantemp   12     lm    list
## wind_speed 12     lm    list

6.2.1 Evaluate

Based on the model summary, the value of metrics obtained by the model can be observed, for lag 10:

lagselect$criteria[,10]
##      AIC(n)       HQ(n)       SC(n)      FPE(n) 
##    10.60996    11.05362    11.70605 40791.90935

The \(R^2\) score of the model after applying can also be reported for both of the time series used:

VAR_meantemp_adjr <- summary(VAR_est$varresult$meantemp)$adj.r.squared
VAR_meantemp_adjr
## [1] 0.9361864
VAR_windspeed_adjr <- summary(VAR_est$varresult$wind_speed)$adj.r.squared
VAR_windspeed_adjr
## [1] 0.4258064

A Portmanteau test is provided to test that the residuals are uncorrelated.

serial.test(VAR_est, lags.pt=10, type="PT.asymptotic")
## 
##  Portmanteau Test (asymptotic)
## 
## data:  Residuals of VAR object VAR_est
## Chi-squared = 7.3153, df = 0, p-value < 2.2e-16
forecasts <- predict(VAR_est, h=114)
forecast <- VAR_est |> forecast(h=18)

Now we use test data to evaluate predictions:

predicted_meantemp <- as.numeric(forecast[2]$forecast$meantemp$mean)
actual_meantemp <- as.numeric(ts_week_test_meantemp)

predicted_windspeed <- as.numeric(forecast[2]$forecast$wind_speed$mean)
actual_winspeed <- as.numeric(ts_week_test_windspeed)
RMSE_meantemp_VAR <- rmse(predicted_meantemp, actual_meantemp)
RMSE_meantemp_VAR
## [1] 73.53212
RMSE_windspeed_VAR <- rmse(predicted_windspeed, actual_winspeed)
RMSE_windspeed_VAR
## [1] 19.90842
MAE_meantemp_VAR <- mae(predicted_meantemp, actual_meantemp)
MAE_meantemp_VAR
## [1] 57.97236
MAE_windspeed_VAR <- mae(predicted_windspeed, actual_winspeed)
MAE_windspeed_VAR
## [1] 14.47499
rsq <- function (x, y) cor(x, y) ^ 2
RSQ_meantemp_VAR <- rsq(predicted_meantemp, actual_meantemp)
RSQ_meantemp_VAR
## [1] 0.3756512
RSQ_windspeed_VAR <- rsq(predicted_windspeed, actual_winspeed)
RSQ_windspeed_VAR
## [1] 0.1374916

6.2.1.1 meantemp

d <- cbind(Adjusted_R2 = VAR_meantemp_adjr, AIC = lagselect$criteria[,10][0])
# at most 4 decimal places
knitr::kable(d, digits = 4)
Adjusted_R2
0.9362
d <- cbind(R2 = RSQ_meantemp_VAR, RMSE = RMSE_meantemp_VAR, MAE = MAE_meantemp_VAR)
# at most 4 decimal places
knitr::kable(d, digits = 4)
R2 RMSE MAE
0.3757 73.5321 57.9724

6.2.1.2 windpseed

d <- cbind(Adjusted_R2 = VAR_meantemp_adjr, AIC = lagselect$criteria[,10][0])
# at most 4 decimal places
knitr::kable(d, digits = 4)
Adjusted_R2
0.9362
d <- cbind(R2 = RSQ_windspeed_VAR, RMSE = RMSE_windspeed_VAR, MAE = MAE_windspeed_VAR)
# at most 4 decimal places
knitr::kable(d, digits = 4)
R2 RMSE MAE
0.1375 19.9084 14.475

6.2.2 Plot Forecast

Firstly, plot of forecasts are displayed, which are based on the model being trained on the training data.

plot(forecasts)

forecast[2]$forecast$meantemp |> autoplot() + autolayer(ts_week_test_meantemp)

forecast[2]$forecast$wind_speed |>  autoplot() + autolayer(ts_week_test_windspeed)

secondly, prediction of test data alongside the actual test data is visualized

ts_plot(forecast[2]$forecast$meantemp$mean, ts_week_test_meantemp)

ts_plot(forecast[2]$forecast$wind_speed$mean, ts_week_test_windspeed)

6.2.3 Granger Causality

Granger_meantemp <- causality(VAR_est, cause = "meantemp")
Granger_meantemp
## $Granger
## 
##  Granger causality H0: meantemp do not Granger-cause wind_speed
## 
## data:  VAR object VAR_est
## F-Test = 3.0682, df1 = 10, df2 = 338, p-value = 0.0009546
## 
## 
## $Instant
## 
##  H0: No instantaneous causality between: meantemp and wind_speed
## 
## data:  VAR object VAR_est
## Chi-squared = 13.96, df = 1, p-value = 0.0001867
Granger_windspeed <- causality(VAR_est, cause = "wind_speed")
Granger_windspeed
## $Granger
## 
##  Granger causality H0: wind_speed do not Granger-cause meantemp
## 
## data:  VAR object VAR_est
## F-Test = 2.5126, df1 = 10, df2 = 338, p-value = 0.006333
## 
## 
## $Instant
## 
##  H0: No instantaneous causality between: wind_speed and meantemp
## 
## data:  VAR object VAR_est
## Chi-squared = 13.96, df = 1, p-value = 0.0001867

6.2.4 Forecast Error Variance Decomposition (FEVD)

FEVD1 <- fevd(VAR_est, n.ahead = 50)
FEVD1
## $meantemp
##        meantemp  wind_speed
##  [1,] 1.0000000 0.000000000
##  [2,] 0.9988206 0.001179378
##  [3,] 0.9921833 0.007816717
##  [4,] 0.9830339 0.016966062
##  [5,] 0.9614487 0.038551268
##  [6,] 0.9588092 0.041190775
##  [7,] 0.9377211 0.062278922
##  [8,] 0.9134910 0.086509021
##  [9,] 0.8625721 0.137427893
## [10,] 0.8254314 0.174568631
## [11,] 0.7681499 0.231850119
## [12,] 0.7173018 0.282698208
## [13,] 0.6756769 0.324323057
## [14,] 0.6306769 0.369323114
## [15,] 0.5968982 0.403101815
## [16,] 0.5643771 0.435622867
## [17,] 0.5380624 0.461937591
## [18,] 0.5178299 0.482170088
## [19,] 0.5033850 0.496614955
## [20,] 0.4933082 0.506691830
## [21,] 0.4867732 0.513226817
## [22,] 0.4843880 0.515611965
## [23,] 0.4855313 0.514468711
## [24,] 0.4899476 0.510052413
## [25,] 0.4961818 0.503818245
## [26,] 0.5030985 0.496901511
## [27,] 0.5102635 0.489736522
## [28,] 0.5170835 0.482916510
## [29,] 0.5228099 0.477190125
## [30,] 0.5271072 0.472892785
## [31,] 0.5294044 0.470595590
## [32,] 0.5297320 0.470268018
## [33,] 0.5281879 0.471812079
## [34,] 0.5249426 0.475057380
## [35,] 0.5204132 0.479586819
## [36,] 0.5149487 0.485051263
## [37,] 0.5088834 0.491116581
## [38,] 0.5025700 0.497429955
## [39,] 0.4964771 0.503522861
## [40,] 0.4909221 0.509077920
## [41,] 0.4861755 0.513824529
## [42,] 0.4823865 0.517613461
## [43,] 0.4796081 0.520391866
## [44,] 0.4779120 0.522088020
## [45,] 0.4772869 0.522713081
## [46,] 0.4776174 0.522382632
## [47,] 0.4787252 0.521274770
## [48,] 0.4804009 0.519599086
## [49,] 0.4824317 0.517568315
## [50,] 0.4846137 0.515386330
## 
## $wind_speed
##         meantemp wind_speed
##  [1,] 0.07585475  0.9241452
##  [2,] 0.08405805  0.9159420
##  [3,] 0.10823066  0.8917693
##  [4,] 0.12850993  0.8714901
##  [5,] 0.12843197  0.8715680
##  [6,] 0.16336265  0.8366374
##  [7,] 0.17888075  0.8211192
##  [8,] 0.18094166  0.8190583
##  [9,] 0.18051164  0.8194884
## [10,] 0.17419141  0.8258086
## [11,] 0.16695287  0.8330471
## [12,] 0.16518175  0.8348183
## [13,] 0.16507974  0.8349203
## [14,] 0.16645482  0.8335452
## [15,] 0.16453433  0.8354657
## [16,] 0.16225321  0.8377468
## [17,] 0.16407736  0.8359226
## [18,] 0.17090974  0.8290903
## [19,] 0.17604569  0.8239543
## [20,] 0.17960182  0.8203982
## [21,] 0.18265705  0.8173429
## [22,] 0.18690905  0.8130910
## [23,] 0.19438529  0.8056147
## [24,] 0.20045512  0.7995449
## [25,] 0.20333657  0.7966634
## [26,] 0.20576390  0.7942361
## [27,] 0.20819005  0.7918100
## [28,] 0.21018765  0.7898124
## [29,] 0.21158046  0.7884195
## [30,] 0.21171277  0.7882872
## [31,] 0.21107900  0.7889210
## [32,] 0.21025594  0.7897441
## [33,] 0.20925443  0.7907456
## [34,] 0.20816943  0.7918306
## [35,] 0.20728517  0.7927148
## [36,] 0.20647236  0.7935276
## [37,] 0.20573784  0.7942622
## [38,] 0.20529778  0.7947022
## [39,] 0.20534235  0.7946577
## [40,] 0.20599918  0.7940008
## [41,] 0.20698159  0.7930184
## [42,] 0.20802076  0.7919792
## [43,] 0.20925384  0.7907462
## [44,] 0.21078009  0.7892199
## [45,] 0.21247569  0.7875243
## [46,] 0.21413925  0.7858607
## [47,] 0.21558787  0.7844121
## [48,] 0.21681327  0.7831867
## [49,] 0.21789182  0.7821082
## [50,] 0.21877383  0.7812262
plot(FEVD1)

6.3 Feedforward Neural Network

6.3.1 Forecast

6.3.1.1 wind_speed

set.seed(34)
# nnetar() requires a numeric vector or time series object as
# input ?nnetar() can be seen for more info on the function
# nnetar() by default fits multiple neural net models and
# gives averaged results xreg option allows for only numeric
# vectors in nnetar() function
fit_windspeed = nnetar(ts_train_windspeed)
fit_windspeed
## Series: ts_train_windspeed 
## Model:  NNAR(1,1,2)[365] 
## Call:   nnetar(y = ts_train_windspeed)
## 
## Average of 20 networks, each of which is
## a 2-2-1 network with 9 weights
## options were - linear output units 
## 
## sigma^2 estimated as 14.78
forecast_windspeed <- forecast(fit_windspeed, h = 114, PI = T)
#forecast_windspeed

6.3.1.2 meantemp

fit_meantemp = nnetar(ts_train_meantemp)
fit_meantemp
## Series: ts_train_meantemp 
## Model:  NNAR(11,1,6)[365] 
## Call:   nnetar(y = ts_train_meantemp)
## 
## Average of 20 networks, each of which is
## a 12-6-1 network with 85 weights
## options were - linear output units 
## 
## sigma^2 estimated as 1.884
forecast_meantemp <- forecast(fit_meantemp, h = 114, PI = T)
#forecast_meantemp

6.3.2 Evluate

6.3.2.1 meantemp

predicted <- as.numeric(forecast_meantemp$mean)
actual <- as.numeric(ts_test_meantemp)
RMSE_meantemp_NN <- rmse(predicted, actual)
RMSE_meantemp_NN
## [1] 3.134642
MAE_meantemp_NN <- mae(predicted, actual)
MAE_meantemp_NN
## [1] 2.456404
rsq <- function (x, y) cor(x, y) ^ 2

RSQ_meantemp_NN <- rsq(actual, predicted)
RSQ_meantemp_NN
## [1] 0.7723737

6.3.2.2 wind_speed

predicted <- as.numeric(forecast_windspeed$mean)
actual <- as.numeric(ts_test_windspeed)
RMSE_windspeed_NN <- rmse(predicted, actual)
RMSE_windspeed_NN
## [1] 3.544034
MAE_windspeed_NN <- mae(predicted, actual)
MAE_windspeed_NN
## [1] 2.761422
RSQ_windspeed_NN <- rsq(actual, predicted)
RSQ_windspeed_NN
## [1] 0.05910477

Now present tables of report both of time series.

6.3.2.3 meantemp

d <- cbind(R2 = RSQ_meantemp_NN, RMSE = RMSE_meantemp_NN, MAE = MAE_meantemp_NN)
# at most 4 decimal places
knitr::kable(d, digits = 4)
R2 RMSE MAE
0.7724 3.1346 2.4564

6.3.2.4 windpseed

d <- cbind(R2 = RSQ_windspeed_NN, RMSE = RMSE_windspeed_NN, MAE = MAE_windspeed_NN)
# at most 4 decimal places
knitr::kable(d, digits = 4)
R2 RMSE MAE
0.0591 3.544 2.7614

6.3.3 Forecast Plots

First we plot forecasts based on the model being trained on the training data.

forecast_windspeed |> autoplot() + autolayer(ts_test_windspeed)

forecast_meantemp |> autoplot() + autolayer(ts_test_meantemp)

Then, we plot the prediction of test data alongside the actual test data.

xts_temp <- xts(ts_test_meantemp, order.by=df_test$date, "%Y-%m-%d")
xts_temp_2 <- xts(forecast_meantemp$mean, order.by=df_test$date, "%Y-%m-%d")

ts_plot(xts_temp, xts_temp_2)

xts_temp <- xts(ts_test_windspeed, order.by=df_test$date, "%Y-%m-%d")
xts_temp_2 <- xts(forecast_windspeed$mean, order.by=df_test$date, "%Y-%m-%d")

ts_plot(xts_temp, xts_temp_2)

6.4 LSTM Neural Network

The long-short term memory model is used in the following.

get_scaling_factors <- function(data){
  out <- c(mean = mean(data), sd = sd(data))
  return(out)
}

normalize_data <- function(data, scaling_factors, reverse = FALSE) {
  
  if (reverse) temp <- (data * scaling_factors[2]) + scaling_factors[1]
  else temp <- (data - scaling_factors[1]) / scaling_factors[2]
  
  out <- temp %>% as.matrix()
  return(out)
}


kerasize_data <- function(data, x = TRUE, lag = 114, pred = 114) {
  
  if (x) {
    
    temp <- sapply(
      1:(length(data) - lag - pred + 1)
      ,function(x) data[x:(x + lag - 1), 1]
    ) %>% t()
    
    out <- array(
      temp %>% unlist() %>% as.numeric()
      ,dim = c(nrow(temp), lag, 1)
    )
    
  }  else {
    
    temp <- sapply(
      (1 + lag):(length(data) - pred + 1)
      ,function(x) data[x:(x + lag - 1), 1]
    ) %>% t()
    
    out <- array(
      temp %>% unlist() %>% as.numeric()
      ,dim = c(nrow(temp), pred, 1)
    )
    
  }
  
  return(out)
  
}

kerasize_pred_input <- function(data, lag = 114, pred = 114){
  temp <- data[(length(data) - pred + 1):length(data)]
  temp <- normalize_data(temp, get_scaling_factors(data))
  out <- array(temp, c(1, lag, 1))
  return(out)
}
lstm_build_model <- function(x, y, units = 128, batch = 1, epochs = 20, rate = 0.2, seed = 2137){
  
  lag = dim(x)[2]
  
  lstm_model <- keras_model_sequential()

  lstm_model %>%
    layer_lstm(units = 128
               ,batch_input_shape = c(batch, lag, 1)
               ,return_sequences = TRUE
               ,stateful = TRUE) %>%
    layer_dropout(rate = rate) %>%
    layer_lstm(units = 64
               ,return_sequences = TRUE
               ,stateful = TRUE) %>%
    layer_dropout(rate = rate) %>%
    time_distributed(layer_dense(units = 1))

  lstm_model %>%
    compile(loss = 'mean_squared_error'
            ,optimizer = 'adam'
            ,metrics = 'accuracy')

  tensorflow::set_random_seed(seed)
  lstm_model %>% fit(
    x = x
    ,y = y
    ,batch_size = batch
    ,epochs = epochs
    ,verbose = 0
    ,shuffle = FALSE)
  
  out <- list(
    model = lstm_model
    ,x = x
    ,batch = batch
    ,lag = lag
    ,pred = dim(y)[2]
  )
  return(out)

}
lstm_forecast <- function(x_test, model, scaling_factors){
  
  batch <- model$batch
  
  temp <- model$model %>%
    predict(x_test, batch_size = batch) %>% 
    .[, , 1] %>%
    normalize_data(scaling_factors = scaling_factors, reverse = TRUE)
  
  out <- list(
    forecast = temp
    ,scaling_factors = scaling_factors
  )
  
  return(out)
  
}
# remove the first row of df_test, as it is common with df_train
#df_test <- df_test[-1,] |> head(1)
#data <- as.data.frame(rbind(df_train, df_test))
#data <- merge(df_train, df_test, all=TRUE)

data_meantemp <- ts(c(ts_train_meantemp, ts_test_meantemp), 
           start = start(ts_train_meantemp), 
           frequency = frequency(ts_test_meantemp)) 
scaling_factors <- get_scaling_factors(data_meantemp)
data_meantemp_norm <- normalize_data(data_meantemp, scaling_factors)

x_data <- kerasize_data(data_meantemp_norm, x = TRUE, lag = 114, pred = 114)
y_data <- kerasize_data(data_meantemp_norm, x = FALSE, lag = 114, pred = 114)
x_test <- kerasize_pred_input(data_meantemp_norm, lag = 114, pred = 114)
model <- lstm_build_model(x_data, y_data)
prediction <- lstm_forecast(x_test, model, scaling_factors)
# If whole data was predicted
#temp <- as.data.frame(rbind(df_train,df_test))
#xts_meantemp_pred <- xts(prediction$forecast, #order.by=temp$date, "%Y-%m-%d")
#xts_all <- xts(temp$meantemp, order.by=temp$date, #"%Y-%m-%d")
#ts_plot(xts_meantemp_pred, xts_all)
xts_temp <- xts(ts_test_meantemp, 
                order.by=df_test$date, 
                "%Y-%m-%d")
xts_temp_2 <- xts(prediction$forecast, 
                  order.by=df_test$date, 
                  "%Y-%m-%d")
#xts_temp
#xts_temp_2
ts_plot(xts_temp, xts_temp_2)

predicted <- as.numeric(prediction$forecast)
actual <- as.numeric(ts_test_meantemp)
RMSE_meantemp_LSTM <- rmse(predicted, actual)
MAE_meantemp_LSTM <- mae(predicted, actual)
RSQ_meantemp_LSTM <- rsq(actual, predicted)
data_windspeed <- ts(c(ts_train_windspeed, ts_test_windspeed), 
           start = start(ts_train_windspeed), 
           frequency = frequency(ts_test_windspeed))    
scaling_factors <- get_scaling_factors(data_windspeed)
data_windspeed_norm <- normalize_data(data_windspeed, scaling_factors)

x_data <- kerasize_data(data_windspeed_norm, x = TRUE, lag = 114, pred = 114)
y_data <- kerasize_data(data_windspeed_norm, x = FALSE, lag = 114, pred = 114)
x_test <- kerasize_pred_input(data_windspeed_norm, lag = 114, pred = 114)
model <- lstm_build_model(x_data, y_data)
prediction <- lstm_forecast(x_test, model, scaling_factors)
predicted <- as.numeric(prediction$forecast)
actual <- as.numeric(ts_test_windspeed)
RMSE_windspeed_LSTM <- rmse(predicted, actual)
MAE_windspeed_LSTM <- mae(predicted, actual)
RSQ_windspeed_LSTM <- rsq(actual, predicted)

6.5 Compare Metrics

metrics_list <- c("RMSE", "MAE", "RSQ")
ARMA_meantemp <- c(RMSE_ARMA, MAE_ARMA, RSQ_ARMA)

VAR_meantemp <- c(RMSE_meantemp_VAR, MAE_meantemp_VAR, RSQ_meantemp_VAR)
VAR_windspeed <- c(RMSE_windspeed_VAR, MAE_windspeed_VAR, RSQ_windspeed_VAR)

NN_meantemp <- c(RMSE_meantemp_NN, MAE_meantemp_NN, RSQ_meantemp_NN)
NN_windspeed <- c(RMSE_windspeed_NN, MAE_windspeed_NN, RSQ_windspeed_NN)

LSTM_meantemp <- c(RMSE_meantemp_LSTM,
                   MAE_meantemp_LSTM, 
                   RSQ_meantemp_LSTM)

LSTM_windspeed <- c(RMSE_windspeed_LSTM,
                   MAE_windspeed_LSTM, 
                   RSQ_windspeed_LSTM)

df_eval <- data.frame(metrics_list, ARMA_meantemp, VAR_meantemp, VAR_windspeed, NN_meantemp, NN_windspeed, LSTM_meantemp, LSTM_windspeed)

df_eval[, sapply(df_eval, is.numeric)] <- apply(df_eval[, sapply(df_eval, is.numeric)], 2, round, 3)

colnames(df_eval) <- lapply(colnames(df_eval), function(x) gsub("^.*_", "", x))

Note that for the SARMA and Nueral Network models, the original data is used, while weekly aggregated data is used for VAR model.

#table(df_eval) |> htmlTable
#knitr::kable(df_eval, col.names = names(df_eval))
#df_eval
htmlTable(df_eval,
          digits = 3,
          cgroup = c("Metrics","ARMA","VAR","NN", "LSTM"),
          n.cgroup = c(1,1,2,2,2),
          
)
Metrics   ARMA   VAR   NN   LSTM
list   meantemp   meantemp windspeed   meantemp windspeed   meantemp windspeed
1 RMSE   4.299   73.532 19.908   3.135 3.544   11.626 4.672
2 MAE   3.576   57.972 14.475   2.456 2.761   10.068 3.738
3 RSQ   0.817   0.376 0.137   0.772 0.059   0.628 0